From 7607ba2da73d75631f7c5c9f6bb5495f78002ea8 Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Thu, 9 Jun 2005 15:25:29 +0000 Subject: [PATCH] bitkeeper revision 1.1699.1.1 (42a85f6955KSFCuD5KSRtCwU-dzakQ) Clean up the page allocator interface a little. In particular physical addresses are now passed as physaddr_t rather than unsigned long (required for 32-bit pae mode). Signed-off-by: Keir Fraser --- xen/arch/ia64/domain.c | 8 +++-- xen/arch/ia64/patch/linux-2.6.7/mm_contig.c | 2 +- xen/arch/ia64/xenmem.c | 9 ++--- xen/arch/x86/apic.c | 10 +++--- xen/arch/x86/dom0_ops.c | 4 +-- xen/arch/x86/domain.c | 14 ++++---- xen/arch/x86/mm.c | 37 ++++++++++----------- xen/arch/x86/smpboot.c | 2 +- xen/arch/x86/vmx_vmcs.c | 6 ++-- xen/arch/x86/x86_32/mm.c | 8 ++--- xen/arch/x86/x86_64/mm.c | 10 +++--- xen/common/domain.c | 2 +- xen/common/grant_table.c | 14 ++++---- xen/common/page_alloc.c | 37 ++++++++++----------- xen/common/trace.c | 2 +- xen/common/xmalloc.c | 8 ++--- xen/drivers/char/console.c | 2 +- xen/drivers/char/serial.c | 2 +- xen/include/asm-x86/mm.h | 1 - xen/include/asm-x86/page.h | 2 +- xen/include/asm-x86/x86_32/page-2level.h | 2 +- xen/include/xen/mm.h | 21 ++++++------ 22 files changed, 100 insertions(+), 103 deletions(-) diff --git a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c index bcaabbb023..4ddb1db5e5 100644 --- a/xen/arch/ia64/domain.c +++ b/xen/arch/ia64/domain.c @@ -680,7 +680,9 @@ void alloc_dom0(void) * Some old version linux, like 2.4, assumes physical memory existing * in 2nd 64M space. */ - dom0_start = alloc_boot_pages(dom0_size,dom0_align); + dom0_start = alloc_boot_pages( + dom0_size >> PAGE_SHIFT, dom0_align >> PAGE_SHIFT); + dom0_start <<= PAGE_SHIFT; if (!dom0_start) { printf("construct_dom0: can't allocate contiguous memory size=%p\n", dom0_size); @@ -698,7 +700,9 @@ void alloc_domU_staging(void) { domU_staging_size = 32*1024*1024; //FIXME: Should be configurable printf("alloc_domU_staging: starting (initializing %d MB...)\n",domU_staging_size/(1024*1024)); - domU_staging_start= alloc_boot_pages(domU_staging_size,domU_staging_align); + domU_staging_start = alloc_boot_pages( + domU_staging_size >> PAGE_SHIFT, domU_staging_align >> PAGE_SHIFT); + domU_staging_start <<= PAGE_SHIFT; if (!domU_staging_size) { printf("alloc_domU_staging: can't allocate, spinning...\n"); while(1); diff --git a/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c b/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c index d6308c051d..87c9f8b651 100644 --- a/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c +++ b/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c @@ -204,7 +204,7 @@ + + /* Request continuous trunk from boot allocator, since HV + * address is identity mapped */ -+ p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE); ++ p = alloc_boot_pages(frame_table_size>>PAGE_SHIFT, FT_ALIGN_SIZE>>PAGE_SHIFT) << PAGE_SHIFT; + if (p == 0) + panic("Not enough memory for frame table.\n"); + diff --git a/xen/arch/ia64/xenmem.c b/xen/arch/ia64/xenmem.c index 6939fb4296..29788d2a3f 100644 --- a/xen/arch/ia64/xenmem.c +++ b/xen/arch/ia64/xenmem.c @@ -82,17 +82,18 @@ paging_init (void) #define FT_ALIGN_SIZE (16UL << 20) void __init init_frametable(void) { - unsigned long i, p; + unsigned long i, pfn; frame_table_size = max_page * sizeof(struct pfn_info); frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK; /* Request continuous trunk from boot allocator, since HV * address is identity mapped */ - p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE); - if (p == 0) + pfn = alloc_boot_pages( + frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT); + if (pfn == 0) panic("Not enough memory for frame table.\n"); - frame_table = __va(p); + frame_table = __va(pfn << PAGE_SHIFT); memset(frame_table, 0, frame_table_size); printk("size of frame_table: %lukB\n", frame_table_size >> 10); diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c index dea77354a2..50497c55c7 100644 --- a/xen/arch/x86/apic.c +++ b/xen/arch/x86/apic.c @@ -580,10 +580,9 @@ void __init init_apic_mappings(void) * zeroes page to simulate the local APIC and another * one for the IO-APIC. */ - if (!smp_found_config && detect_init_APIC()) { - apic_phys = alloc_xenheap_page(); - apic_phys = __pa(apic_phys); - } else + if (!smp_found_config && detect_init_APIC()) + apic_phys = __pa(alloc_xenheap_page()); + else apic_phys = mp_lapic_addr; set_fixmap_nocache(FIX_APIC_BASE, apic_phys); @@ -616,8 +615,7 @@ void __init init_apic_mappings(void) } } else { fake_ioapic_page: - ioapic_phys = alloc_xenheap_page(); - ioapic_phys = __pa(ioapic_phys); + ioapic_phys = __pa(alloc_xenheap_page()); } set_fixmap_nocache(idx, ioapic_phys); apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c index dbb723090d..e8979417ec 100644 --- a/xen/arch/x86/dom0_ops.c +++ b/xen/arch/x86/dom0_ops.c @@ -259,7 +259,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op) break; } - l_arr = (unsigned long *)alloc_xenheap_page(); + l_arr = alloc_xenheap_page(); ret = 0; for( n = 0; n < num; ) @@ -324,7 +324,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op) n += j; } - free_xenheap_page((unsigned long)l_arr); + free_xenheap_page(l_arr); put_domain(d); } diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index ae4099a008..b892e15955 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -222,10 +222,10 @@ void arch_free_vcpu_struct(struct vcpu *v) void free_perdomain_pt(struct domain *d) { - free_xenheap_page((unsigned long)d->arch.mm_perdomain_pt); + free_xenheap_page(d->arch.mm_perdomain_pt); #ifdef __x86_64__ - free_xenheap_page((unsigned long)d->arch.mm_perdomain_l2); - free_xenheap_page((unsigned long)d->arch.mm_perdomain_l3); + free_xenheap_page(d->arch.mm_perdomain_l2); + free_xenheap_page(d->arch.mm_perdomain_l3); #endif } @@ -240,7 +240,7 @@ void arch_do_createdomain(struct vcpu *v) v->arch.schedule_tail = continue_nonidle_task; - d->shared_info = (void *)alloc_xenheap_page(); + d->shared_info = alloc_xenheap_page(); memset(d->shared_info, 0, PAGE_SIZE); v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id]; v->cpumap = CPUMAP_RUNANYWHERE; @@ -248,7 +248,7 @@ void arch_do_createdomain(struct vcpu *v) machine_to_phys_mapping[virt_to_phys(d->shared_info) >> PAGE_SHIFT] = INVALID_M2P_ENTRY; - d->arch.mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page(); + d->arch.mm_perdomain_pt = alloc_xenheap_page(); memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE); machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT] = INVALID_M2P_ENTRY; @@ -263,12 +263,12 @@ void arch_do_createdomain(struct vcpu *v) v->arch.guest_vl3table = __linear_l3_table; v->arch.guest_vl4table = __linear_l4_table; - d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page(); + d->arch.mm_perdomain_l2 = alloc_xenheap_page(); memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE); d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] = l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR); - d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page(); + d->arch.mm_perdomain_l3 = alloc_xenheap_page(); memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE); d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2), diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 083d967e25..7fd5b8c579 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -145,31 +145,28 @@ static struct domain *dom_xen, *dom_io; /* Frame table and its size in pages. */ struct pfn_info *frame_table; -unsigned long frame_table_size; unsigned long max_page; void __init init_frametable(void) { - unsigned long i, p, step; + unsigned long nr_pages, page_step, i, pfn; - frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START; - frame_table_size = max_page * sizeof(struct pfn_info); - frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK; + frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START; - step = (1 << L2_PAGETABLE_SHIFT); - for ( i = 0; i < frame_table_size; i += step ) + nr_pages = PFN_UP(max_page * sizeof(*frame_table)); + page_step = (1 << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT; + + for ( i = 0; i < nr_pages; i += page_step ) { - p = alloc_boot_pages(min(frame_table_size - i, step), step); - if ( p == 0 ) + pfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step); + if ( pfn == 0 ) panic("Not enough memory for frame table\n"); map_pages_to_xen( - FRAMETABLE_VIRT_START + i, - p >> PAGE_SHIFT, - step >> PAGE_SHIFT, - PAGE_HYPERVISOR); + FRAMETABLE_VIRT_START + (i << PAGE_SHIFT), + pfn, page_step, PAGE_HYPERVISOR); } - memset(frame_table, 0, frame_table_size); + memset(frame_table, 0, nr_pages << PAGE_SHIFT); } void arch_init_memory(void) @@ -2954,15 +2951,15 @@ int ptwr_do_page_fault(struct domain *d, unsigned long addr) int ptwr_init(struct domain *d) { - void *x = (void *)alloc_xenheap_page(); - void *y = (void *)alloc_xenheap_page(); + void *x = alloc_xenheap_page(); + void *y = alloc_xenheap_page(); if ( (x == NULL) || (y == NULL) ) { if ( x != NULL ) - free_xenheap_page((unsigned long)x); + free_xenheap_page(x); if ( y != NULL ) - free_xenheap_page((unsigned long)y); + free_xenheap_page(y); return -ENOMEM; } @@ -2975,8 +2972,8 @@ int ptwr_init(struct domain *d) void ptwr_destroy(struct domain *d) { cleanup_writable_pagetable(d); - free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_ACTIVE].page); - free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_INACTIVE].page); + free_xenheap_page(d->arch.ptwr[PTWR_PT_ACTIVE].page); + free_xenheap_page(d->arch.ptwr[PTWR_PT_INACTIVE].page); } void cleanup_writable_pagetable(struct domain *d) diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index f276e9632e..80fe8122a4 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -781,7 +781,7 @@ static int __init do_boot_cpu(int apicid) /* So we see what's up */ printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); - stack = (void *)alloc_xenheap_pages(STACK_ORDER); + stack = alloc_xenheap_pages(STACK_ORDER); #if defined(__i386__) stack_start.esp = (void *)__pa(stack); #elif defined(__x86_64__) diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c index 5fed12c279..acaa8f6d5c 100644 --- a/xen/arch/x86/vmx_vmcs.c +++ b/xen/arch/x86/vmx_vmcs.c @@ -41,8 +41,8 @@ struct vmcs_struct *alloc_vmcs(void) rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high); vmcs_size = vmx_msr_high & 0x1fff; - vmcs = (struct vmcs_struct *) alloc_xenheap_pages(get_order(vmcs_size)); - memset((char *) vmcs, 0, vmcs_size); /* don't remove this */ + vmcs = alloc_xenheap_pages(get_order(vmcs_size)); + memset((char *)vmcs, 0, vmcs_size); /* don't remove this */ vmcs->vmcs_revision_id = vmx_msr_low; return vmcs; @@ -53,7 +53,7 @@ void free_vmcs(struct vmcs_struct *vmcs) int order; order = (vmcs_size >> PAGE_SHIFT) - 1; - free_xenheap_pages((unsigned long) vmcs, order); + free_xenheap_pages(vmcs, order); } static inline int construct_vmcs_controls(void) diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c index 686064baa4..b388c1cc49 100644 --- a/xen/arch/x86/x86_32/mm.c +++ b/xen/arch/x86/x86_32/mm.c @@ -43,7 +43,7 @@ struct pfn_info *alloc_xen_pagetable(void) if ( !early_boot ) { - void *v = (void *)alloc_xenheap_page(); + void *v = alloc_xenheap_page(); return ((v == NULL) ? NULL : virt_to_page(v)); } @@ -54,7 +54,7 @@ struct pfn_info *alloc_xen_pagetable(void) void free_xen_pagetable(struct pfn_info *pg) { - free_xenheap_page((unsigned long)page_to_virt(pg)); + free_xenheap_page(page_to_virt(pg)); } l2_pgentry_t *virt_to_xen_l2e(unsigned long v) @@ -113,7 +113,7 @@ void __init paging_init(void) /* Create page tables for ioremap(). */ for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ ) { - ioremap_pt = (void *)alloc_xenheap_page(); + ioremap_pt = alloc_xenheap_page(); clear_page(ioremap_pt); idle_pg_table_l2[l2_linear_offset(IOREMAP_VIRT_START) + i] = l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR); @@ -121,7 +121,7 @@ void __init paging_init(void) /* Set up mapping cache for domain pages. */ mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER)); - mapcache = (l1_pgentry_t *)alloc_xenheap_pages(mapcache_order); + mapcache = alloc_xenheap_pages(mapcache_order); memset(mapcache, 0, PAGE_SIZE << mapcache_order); for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ ) idle_pg_table_l2[l2_linear_offset(MAPCACHE_VIRT_START) + i] = diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index 66d3e96d61..ae3dac0b6b 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -32,13 +32,13 @@ struct pfn_info *alloc_xen_pagetable(void) { extern int early_boot; - unsigned long p; + unsigned long pfn; if ( !early_boot ) return alloc_domheap_page(NULL); - p = alloc_boot_pages(PAGE_SIZE, PAGE_SIZE); - return ((p == 0) ? NULL : phys_to_page(p)); + pfn = alloc_boot_pages(1, 1); + return ((pfn == 0) ? NULL : pfn_to_page(pfn)); } void free_xen_pagetable(struct pfn_info *pg) @@ -82,12 +82,12 @@ void __init paging_init(void) idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); /* Create user-accessible L2 directory to map the MPT for guests. */ - l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page(); + l3_ro_mpt = alloc_xenheap_page(); clear_page(l3_ro_mpt); idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] = l4e_from_page( virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER); - l2_ro_mpt = (l2_pgentry_t *)alloc_xenheap_page(); + l2_ro_mpt = alloc_xenheap_page(); clear_page(l2_ro_mpt); l3_ro_mpt[l3_table_offset(RO_MPT_VIRT_START)] = l3e_from_page( diff --git a/xen/common/domain.c b/xen/common/domain.c index 5c5214926c..b11ec069fa 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -266,7 +266,7 @@ void domain_destruct(struct domain *d) grant_table_destroy(d); free_perdomain_pt(d); - free_xenheap_page((unsigned long)d->shared_info); + free_xenheap_page(d->shared_info); free_domain_struct(d); diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index e75a062f88..683a051df3 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -375,7 +375,7 @@ __gnttab_map_grant_ref( grant_table_t *lgt = ld->grant_table; /* Grow the maptrack table. */ - new_mt = (void *)alloc_xenheap_pages(lgt->maptrack_order + 1); + new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1); if ( new_mt == NULL ) { put_domain(rd); @@ -388,7 +388,7 @@ __gnttab_map_grant_ref( for ( i = lgt->maptrack_limit; i < (lgt->maptrack_limit << 1); i++ ) new_mt[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT; - free_xenheap_pages((unsigned long)lgt->maptrack, lgt->maptrack_order); + free_xenheap_pages(lgt->maptrack, lgt->maptrack_order); lgt->maptrack = new_mt; lgt->maptrack_order += 1; lgt->maptrack_limit <<= 1; @@ -1095,7 +1095,7 @@ grant_table_create( memset(t->active, 0, sizeof(active_grant_entry_t) * NR_GRANT_ENTRIES); /* Tracking of mapped foreign frames table */ - if ( (t->maptrack = (void *)alloc_xenheap_page()) == NULL ) + if ( (t->maptrack = alloc_xenheap_page()) == NULL ) goto no_mem; t->maptrack_order = 0; t->maptrack_limit = PAGE_SIZE / sizeof(grant_mapping_t); @@ -1104,7 +1104,7 @@ grant_table_create( t->maptrack[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT; /* Shared grant table. */ - t->shared = (void *)alloc_xenheap_pages(ORDER_GRANT_FRAMES); + t->shared = alloc_xenheap_pages(ORDER_GRANT_FRAMES); if ( t->shared == NULL ) goto no_mem; memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE); @@ -1127,7 +1127,7 @@ grant_table_create( { xfree(t->active); if ( t->maptrack != NULL ) - free_xenheap_page((unsigned long)t->maptrack); + free_xenheap_page(t->maptrack); xfree(t); } return -ENOMEM; @@ -1216,8 +1216,8 @@ grant_table_destroy( { /* Free memory relating to this grant table. */ d->grant_table = NULL; - free_xenheap_pages((unsigned long)t->shared, ORDER_GRANT_FRAMES); - free_xenheap_page((unsigned long)t->maptrack); + free_xenheap_pages(t->shared, ORDER_GRANT_FRAMES); + free_xenheap_page(t->maptrack); xfree(t->active); xfree(t); } diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 610fbdb020..527870de37 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -133,7 +133,7 @@ static void map_free(unsigned long first_page, unsigned long nr_pages) */ /* Initialise allocator to handle up to @max_page pages. */ -unsigned long init_boot_allocator(unsigned long bitmap_start) +physaddr_t init_boot_allocator(physaddr_t bitmap_start) { bitmap_start = round_pgup(bitmap_start); @@ -148,7 +148,7 @@ unsigned long init_boot_allocator(unsigned long bitmap_start) return bitmap_start + bitmap_size; } -void init_boot_pages(unsigned long ps, unsigned long pe) +void init_boot_pages(physaddr_t ps, physaddr_t pe) { unsigned long bad_pfn; char *p; @@ -179,23 +179,20 @@ void init_boot_pages(unsigned long ps, unsigned long pe) } } -unsigned long alloc_boot_pages(unsigned long size, unsigned long align) +unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align) { unsigned long pg, i; - size = round_pgup(size) >> PAGE_SHIFT; - align = round_pgup(align) >> PAGE_SHIFT; - - for ( pg = 0; (pg + size) < (bitmap_size*8); pg += align ) + for ( pg = 0; (pg + nr_pfns) < (bitmap_size*8); pg += pfn_align ) { - for ( i = 0; i < size; i++ ) + for ( i = 0; i < nr_pfns; i++ ) if ( allocated_in_map(pg + i) ) break; - if ( i == size ) + if ( i == nr_pfns ) { - map_alloc(pg, size); - return pg << PAGE_SHIFT; + map_alloc(pg, nr_pfns); + return pg; } } @@ -402,14 +399,14 @@ void scrub_heap_pages(void) * XEN-HEAP SUB-ALLOCATOR */ -void init_xenheap_pages(unsigned long ps, unsigned long pe) +void init_xenheap_pages(physaddr_t ps, physaddr_t pe) { unsigned long flags; ps = round_pgup(ps); pe = round_pgdown(pe); - memguard_guard_range(__va(ps), pe - ps); + memguard_guard_range(phys_to_virt(ps), pe - ps); /* * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to @@ -424,7 +421,7 @@ void init_xenheap_pages(unsigned long ps, unsigned long pe) } -unsigned long alloc_xenheap_pages(unsigned int order) +void *alloc_xenheap_pages(unsigned int order) { unsigned long flags; struct pfn_info *pg; @@ -446,22 +443,22 @@ unsigned long alloc_xenheap_pages(unsigned int order) pg[i].u.inuse.type_info = 0; } - return (unsigned long)page_to_virt(pg); + return page_to_virt(pg); no_memory: printk("Cannot handle page request order %d!\n", order); - return 0; + return NULL; } -void free_xenheap_pages(unsigned long p, unsigned int order) +void free_xenheap_pages(void *v, unsigned int order) { unsigned long flags; - memguard_guard_range((void *)p, 1 << (order + PAGE_SHIFT)); + memguard_guard_range(v, 1 << (order + PAGE_SHIFT)); local_irq_save(flags); - free_heap_pages(MEMZONE_XEN, virt_to_page(p), order); + free_heap_pages(MEMZONE_XEN, virt_to_page(v), order); local_irq_restore(flags); } @@ -471,7 +468,7 @@ void free_xenheap_pages(unsigned long p, unsigned int order) * DOMAIN-HEAP SUB-ALLOCATOR */ -void init_domheap_pages(unsigned long ps, unsigned long pe) +void init_domheap_pages(physaddr_t ps, physaddr_t pe) { ASSERT(!in_irq()); diff --git a/xen/common/trace.c b/xen/common/trace.c index 291d35dd28..034dbb4d5d 100644 --- a/xen/common/trace.c +++ b/xen/common/trace.c @@ -68,7 +68,7 @@ void init_trace_bufs(void) nr_pages = num_online_cpus() * opt_tbuf_size; order = get_order(nr_pages * PAGE_SIZE); - if ( (rawbuf = (char *)alloc_xenheap_pages(order)) == NULL ) + if ( (rawbuf = alloc_xenheap_pages(order)) == NULL ) { printk("Xen trace buffers: memory allocation failed\n"); return; diff --git a/xen/common/xmalloc.c b/xen/common/xmalloc.c index de4831e18c..3cfea23101 100644 --- a/xen/common/xmalloc.c +++ b/xen/common/xmalloc.c @@ -71,7 +71,7 @@ static void *xmalloc_new_page(size_t size) struct xmalloc_hdr *hdr; unsigned long flags; - hdr = (struct xmalloc_hdr *)alloc_xenheap_pages(0); + hdr = alloc_xenheap_page(); if ( hdr == NULL ) return NULL; @@ -88,7 +88,7 @@ static void *xmalloc_whole_pages(size_t size) struct xmalloc_hdr *hdr; unsigned int pageorder = get_order(size); - hdr = (struct xmalloc_hdr *)alloc_xenheap_pages(pageorder); + hdr = alloc_xenheap_pages(pageorder); if ( hdr == NULL ) return NULL; @@ -157,7 +157,7 @@ void xfree(const void *p) /* Big allocs free directly. */ if ( hdr->size >= PAGE_SIZE ) { - free_xenheap_pages((unsigned long)hdr, get_order(hdr->size)); + free_xenheap_pages(hdr, get_order(hdr->size)); return; } @@ -192,7 +192,7 @@ void xfree(const void *p) if ( hdr->size == PAGE_SIZE ) { BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0); - free_xenheap_pages((unsigned long)hdr, 0); + free_xenheap_pages(hdr, 0); } else { diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c index 5ade44b42b..f8fe1d69e0 100644 --- a/xen/drivers/char/console.c +++ b/xen/drivers/char/console.c @@ -628,7 +628,7 @@ static int __init debugtrace_init(void) return 0; order = get_order(bytes); - debugtrace_buf = (char *)alloc_xenheap_pages(order); + debugtrace_buf = alloc_xenheap_pages(order); ASSERT(debugtrace_buf != NULL); memset(debugtrace_buf, '\0', bytes); diff --git a/xen/drivers/char/serial.c b/xen/drivers/char/serial.c index a59146da0a..bec789e084 100644 --- a/xen/drivers/char/serial.c +++ b/xen/drivers/char/serial.c @@ -363,7 +363,7 @@ void serial_async_transmit(struct serial_port *port) { BUG_ON(!port->driver->tx_empty); if ( !port->txbuf ) - port->txbuf = (char *)alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ)); + port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ)); } /* diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index b8ebd1e372..47793cb2d3 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -141,7 +141,6 @@ static inline u32 pickle_domptr(struct domain *domain) } while ( 0 ) extern struct pfn_info *frame_table; -extern unsigned long frame_table_size; extern unsigned long max_page; void init_frametable(void); diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h index 883acd13dd..dd777263c1 100644 --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -7,7 +7,7 @@ #else #define PAGE_SIZE (1 << PAGE_SHIFT) #endif -#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_MASK (~(intpte_t)(PAGE_SIZE-1)) #define PAGE_FLAG_MASK (~0U) #ifndef __ASSEMBLY__ diff --git a/xen/include/asm-x86/x86_32/page-2level.h b/xen/include/asm-x86/x86_32/page-2level.h index 9659ce0430..34128f24fa 100644 --- a/xen/include/asm-x86/x86_32/page-2level.h +++ b/xen/include/asm-x86/x86_32/page-2level.h @@ -48,7 +48,7 @@ typedef l2_pgentry_t root_pgentry_t; /* Extract flags into 12-bit integer, or turn 12-bit flags into a pte mask. */ #define get_pte_flags(x) ((int)(x) & 0xFFF) -#define put_pte_flags(x) ((intpte_t)(x)) +#define put_pte_flags(x) ((intpte_t)((x) & 0xFFF)) #define L1_DISALLOW_MASK (0xFFFFF180U) /* PAT/GLOBAL */ #define L2_DISALLOW_MASK (0xFFFFF180U) /* PSE/GLOBAL */ diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 4e7f570643..1919b5e9e7 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -3,6 +3,7 @@ #define __XEN_MM_H__ #include +#include #include #include @@ -10,9 +11,9 @@ struct domain; struct pfn_info; /* Boot-time allocator. Turns into generic allocator after bootstrap. */ -unsigned long init_boot_allocator(unsigned long bitmap_start); -void init_boot_pages(unsigned long ps, unsigned long pe); -unsigned long alloc_boot_pages(unsigned long size, unsigned long align); +physaddr_t init_boot_allocator(physaddr_t bitmap_start); +void init_boot_pages(physaddr_t ps, physaddr_t pe); +unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align); void end_boot_allocator(void); /* Generic allocator. These functions are *not* interrupt-safe. */ @@ -24,19 +25,19 @@ void free_heap_pages( void scrub_heap_pages(void); /* Xen suballocator. These functions are interrupt-safe. */ -void init_xenheap_pages(unsigned long ps, unsigned long pe); -unsigned long alloc_xenheap_pages(unsigned int order); -void free_xenheap_pages(unsigned long p, unsigned int order); +void init_xenheap_pages(physaddr_t ps, physaddr_t pe); +void *alloc_xenheap_pages(unsigned int order); +void free_xenheap_pages(void *v, unsigned int order); #define alloc_xenheap_page() (alloc_xenheap_pages(0)) -#define free_xenheap_page(_p) (free_xenheap_pages(_p,0)) +#define free_xenheap_page(v) (free_xenheap_pages(v,0)) /* Domain suballocator. These functions are *not* interrupt-safe.*/ -void init_domheap_pages(unsigned long ps, unsigned long pe); +void init_domheap_pages(physaddr_t ps, physaddr_t pe); struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order); void free_domheap_pages(struct pfn_info *pg, unsigned int order); unsigned long avail_domheap_pages(void); -#define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0)) -#define free_domheap_page(_p) (free_domheap_pages(_p,0)) +#define alloc_domheap_page(d) (alloc_domheap_pages(d,0)) +#define free_domheap_page(p) (free_domheap_pages(p,0)) /* Automatic page scrubbing for dead domains. */ extern struct list_head page_scrub_list; -- 2.30.2